Close

@InProceedings{MattosMenaCesaVelh:2010:3DLiFa,
               author = "Mattos, Andr{\'e}a Britto and Mena-Chalco, Jes{\'u}s Pascual and 
                         Cesar Junior, Roberto Marcondes and Velho, Luiz Carlos Pacheco 
                         Rodrigues",
          affiliation = "{Universidade de S{\~a}o Paulo} and {Universidade de S{\~a}o 
                         Paulo} and {Universidade de S{\~a}o Paulo} and {Instituto 
                         Nacional de Matem{\'a}tica Pura e Aplicada}",
                title = "3D linear facial animation based on real data",
            booktitle = "Proceedings...",
                 year = "2010",
               editor = "Bellon, Olga and Esperan{\c{c}}a, Claudio",
         organization = "Conference on Graphics, Patterns and Images, 23. (SIBGRAPI)",
            publisher = "IEEE Computer Society",
              address = "Los Alamitos",
             keywords = "Computer graphics, facial animation, 3D reconstruction.",
             abstract = "In this paper we introduce a Facial Animation system using real 
                         three-dimensional models of people, acquired by a 3D scanner. We 
                         consider a dataset composed by models displaying different facial 
                         expressions and a linear interpolation technique is used to 
                         produce a smooth transition between them. One-to-one 
                         correspondences between the meshes of each facial expression are 
                         required in order to apply the interpolation process. Instead of 
                         focusing in the computation of dense correspondence, some points 
                         are selected and a triangulation is defined, being refined by 
                         consecutive subdivisions, that compute the matchings of 
                         intermediate points. We are able to animate any model of the 
                         dataset, given its texture information for the neutral face and 
                         the geometry information for all the expressions along with the 
                         neutral face. This is made by computing matrices with the 
                         variations of every vertex when changing from the neutral face to 
                         the other expressions. The knowledge of the matrices obtained in 
                         this process makes it possible to animate other models given only 
                         the texture and geometry information of the neutral face. 
                         Furthermore, the system uses 3D reconstructed models, being 
                         capable of generating a three-dimensional facial animation from a 
                         single 2D image of a person. Also, as an extension of the system, 
                         we use artificial models that contain expressions of visemes, that 
                         are not part of the expressions of the dataset, and their 
                         displacements are applied to the real models. This allows these 
                         models to be given as input to a speech synthesis application in 
                         which the face is able to speak phrases typed by the user. 
                         Finally, we generate an average face and increase the 
                         displacements between a subject from the dataset and the average 
                         face, creating, automatically, a caricature of the subject.",
  conference-location = "Gramado, RS, Brazil",
      conference-year = "30 Aug.-3 Sep. 2010",
                  doi = "10.1109/SIBGRAPI.2010.44",
                  url = "http://dx.doi.org/10.1109/SIBGRAPI.2010.44",
             language = "en",
                  ibi = "8JMKD3MGPBW34M/386A48P",
                  url = "http://urlib.net/ibi/8JMKD3MGPBW34M/386A48P",
           targetfile = "3D Linear Facial Animation Based on Real Data.pdf",
        urlaccessdate = "2024, Apr. 29"
}


Close